73486a
@@ -42,6 +42,7 @@
 import org.apache.hadoop.mapred.JobConf;
 import org.apache.hadoop.mapred.RunningJob;
 import org.apache.hadoop.mapred.TaskCompletionEvent;
+import org.apache.hadoop.mapred.TaskReport;
 
 public class HadoopJobExecHelper {
   
@@ -207,6 +208,7 @@
private boolean progress(ExecDriverTaskHandle th) throws IOException {
     StringBuilder errMsg = new StringBuilder();
     long pullInterval = HiveConf.getLongVar(job, HiveConf.ConfVars.HIVECOUNTERSPULLINTERVAL);
     boolean initializing = true;
+    boolean initOutputPrinted = false;
     while (!rj.isComplete()) {
       try {
         Thread.sleep(pullInterval);
@@ -222,6 +224,41 @@
private boolean progress(ExecDriverTaskHandle th) throws IOException {
         initializing = false;
       }
 
+      if (!initOutputPrinted) {
+        SessionState ss = SessionState.get();
+
+        String logMapper;
+        String logReducer;
+	
+        TaskReport[] mappers = jc.getMapTaskReports(rj.getJobID());
+        if (mappers == null) {
+          logMapper = "no information for number of mappers; ";
+        } else {
+          int numMap = mappers.length;
+          if (ss != null) {
+            ss.getHiveHistory().setTaskProperty(SessionState.get().getQueryId(), getId(),
+              Keys.TASK_NUM_MAPPERS, Integer.toString(numMap));
+          }
+          logMapper = "number of mappers: " + numMap + "; ";
+        }
+	
+        TaskReport[] reducers = jc.getReduceTaskReports(rj.getJobID());
+        if (reducers == null) {
+          logReducer = "no information for number of reducers. ";
+        } else {
+          int numReduce = reducers.length;
+          if (ss != null) {
+            ss.getHiveHistory().setTaskProperty(SessionState.get().getQueryId(), getId(),
+              Keys.TASK_NUM_REDUCERS, Integer.toString(numReduce));
+          }
+          logReducer = "number of reducers: " + numReduce;
+        }
+
+        console
+            .printInfo("Hadoop job information for " + getId() + ": " + logMapper + logReducer);
+        initOutputPrinted = true;
+      }
+
       RunningJob newRj = jc.getJob(rj.getJobID());
       if (newRj == null) {
         // under exceptional load, hadoop may not be able to look up status
